for t = 1:L % During all speech stimulus
% Simulate incoming data (new sample)
new_sampleGamma = GammaAmpl(t); new_sampleTheta = ThetaPhase(t);
new_sampleSpeech = StimulusSpeech(t);
% Shift buffer and add new sample
eeg_bufferGamma(1:end-1, :) = eeg_bufferGamma(2:end, :);
eeg_bufferGamma(end, :) = new_sampleGamma;
eeg_bufferTheta(1:end-1, :) = eeg_bufferTheta(2:end, :);
eeg_bufferTheta(end, :) = new_sampleTheta;
eeg_bufferSpeech(1:end-1, :) = eeg_bufferSpeech(2:end, :);
eeg_bufferSpeech(end, :) = new_sampleSpeech;
% Automatic detection of word appearance in user's environment
idx = t; % a word appears
% Process data when we have enough samples for buffer-like moving one window
if mod(t, overlap) == 0 && t >= window_size
%every half window and starts when 1s acquired
% t_currentWindowTot = [t_currentWindowTot; t];
disp(['Buffer-like moving window at: ', num2str(t/fs), ' seconds'])
% Get current window of data
current_windowGamma = eeg_bufferGamma(end-window_size+1:end, :);
current_windowTheta = eeg_bufferTheta(end-window_size+1:end, :);
% current_windowStim = eeg_bufferSpeech(end-window_size+1:end, :);
% Identify samples that correspond to rest or speech processing in
t_Window=t-window_size+1:t;
idx_word_currentWindow = find(ismember(t_Window, idx_word)); %at least part of a word is present in the current window
current_windowGammaWord = current_windowGamma(idx_word_currentWindow);
current_windowThetaWord = current_windowTheta(idx_word_currentWindow);
% current_windowStimWord = current_windowStim(idx_word_currentWindow);
idx_rest_currentWindow = find(ismember(t_Window, idx_rest));
current_windowGammaRest = current_windowGamma(idx_rest_currentWindow);
current_windowThetaRest = current_windowTheta(idx_rest_currentWindow);
% Compute MI during word processing
try [mi_Word, entropy, fd_bins_Word] = MI(current_windowGammaWord,...
current_windowThetaWord,[]);
% Having little amount of data may inflate MI.
% Compute inflation factor and subtract it from MI
n_Word = size(current_windowGammaWord,1);
mutinfo_error_Word = (fd_bins_Word-1).^2./(2.*n_Word.*log(2));
mi_corrected_Word{1,t} = mi_Word-mutinfo_error_Word;
mi_corrected_Word = mi_corrected_Word(~cellfun('isempty',mi_corrected_Word));
mi_Word = NaN; entropy= NaN; fd_bins_Word = NaN;
try [mi_Rest, entropy, fd_bins_Rest] = MI(current_windowGammaRest,...
current_windowThetaRest,[]);
% Having little amount of data may inflate MI.
% Compute inflation factor and subtract it from MI
n_Rest = size(current_windowGammaRest,1);
mutinfo_error_Rest = (fd_bins_Rest-1).^2./(2.*n_Rest.*log(2));
mi_corrected_Rest{1,t} = mi_Rest-mutinfo_error_Rest;
mi_corrected_Rest = mi_corrected_Rest(~cellfun('isempty',mi_corrected_Rest));
mi_Rest = NaN; entropy= NaN; fd_bins_Rest = NaN;
if ~isempty(current_windowGammaWord) && ~isempty(mi_corrected_Rest)
idx_currentWordWindow = size(mi_corrected_Word,2);
mi_rest = cell2mat(mi_corrected_Rest);
switch idx_currentWordWindow
% First window with a word. Output positive image with probability 1 if
if mi_corrected_Word{1,idx_currentWordWindow} > mean(mi_rest)
feature = 'ok'; % conditions are met for a positive image
feature = 'not ok'; % conditions are met for a negative image
cp = [0, cumsum(Prob)]; %cummulative summ of probabilities -
% if a image has higher probability, it will be more likely that a
% random number would be lower, so the index of r > cp from last
% item would be the one of its prob - 1 (because starts with 0)
r = rand; %random number between 0 and 1
ind = find(r>cp, 1, 'last'); image = Feedback(ind);
filename = filenamesFeedback{1,image+1};
imageDisp = imread(filename);
imageFeed = figure(); imshow(imageDisp);
disp(['Feedback: ', filename]);
BinoLikelihood0 = xBinoPrior;
BinoLikelihood1 = xBinoPrior1(end:-1:1);
% Output likelihood distribution
figure('WindowState','maximized')
bar(xBinoPrior,param,'FaceAlpha',0.3)
xlabel('Proportion of positive image'); ylabel('Probability of image appearance')
binoPosterior = (binoPrior.*BinoLikelihood0)/p0;
% Save cumulated posterior distributions
binoPosterior_cum{1,t} = binoPosterior;
binoPosterior_cum = binoPosterior_cum(~cellfun('isempty',binoPosterior_cum));
idx_currentbinoPosterior = size(binoPosterior_cum,2);
% Output posterior distribution
figure('WindowState','maximized')
bar(xBinoPrior,binoPrior,'FaceAlpha',0.3)
bar(xBinoPrior,binoPosterior,'FaceAlpha',0.3)
legend('Prior', 'Posterior')
xlabel('Proportion of positive image');ylabel('Probability of image appearance');
% Compute new probability of success
% Number of trials (from the length of pdf_values - 1)
n = length(binoPosterior) - 1;
% Objective function to minimize
objective_function = @(p) sum((binoPosterior - binopdf(0:n, n, p)).^2);
% Use fminsearch to find the optimal p
[p0,fval,exitflag,output] = fminsearch(objective_function, p_initial);
disp(['Posterior probability of success (p): ', num2str(p0)]);
set(h, 'XData', (1:t)/fs, 'YData', p0_cum(1:t));
otherwise %Not first word window
close(imageFeed); %remove previous feedback
mi_corrected_WorddB = cell2mat(mi_corrected_Word);
% if MI greater than rest AND greater than the average
% past MI during word. There was an improvement in MI
if mi_corrected_Word{1,idx_currentWordWindow} > mean(mi_rest) && ...
mi_corrected_Word{1,idx_currentWordWindow} >...
mean(mi_corrected_WorddB(1:idx_currentWordWindow-1))
% increases the probability of positive feedback according to EEG data
% probability of positive feedback = p0 (posterior modelling of the user)
% if conditions of MI are met --> increases it
p0a = p0+((1/4)*p1); % ensures that it will never be more than 1
feature = 'ok'; % conditions are met for a positive image
else %there was no improvement in MI
% increases the probability of negative feedback according to EEG data
% probability of negative feedback = p1 (posterior modelling of the user)
p1a = p1+((1/4)*p0); % ensures that it will never be more than 1
feature = 'not ok'; % conditions are met for a negative image
Prob = [p0a, p1a]; cp = [0, cumsum(Prob)];
r = rand; %random number between 0 and 1
ind = find(r>cp, 1, 'last'); image = Feedback(ind);
filename = filenamesFeedback{1,image+1};
imageDisp = imread(filename);
imageFeed = figure(); imshow(imageDisp);
disp(['Feedback: ', filename]);
disp('Improvement in MI: Yes');
BinoLikelihood0 = xBinoPrior;
case 'not ok' % Goal: do not affect the probability of success that much
disp('Improvement in MI: No');
% Considers the possibility of cognitive internal factors
% for likelihood (beyond sensory sampling)
% Square root the elements of the vector
% Highest weight on proportion for positive image, but does not reach 1
% and nonlinear decrease (slower than linear) towards proportion of negative images
BinoPrior_sqrt = sqrt(xBinoPrior);
% Calculate the sum of the original vector and the squared vector
sum_BinoPrior = sum(xBinoPrior); sum_BinoPrior_squared = sum(BinoPrior_sqrt);
% Calculate the scaling factor
scaling_factor = sum_BinoPrior / sum_BinoPrior_squared;
% Scale the squared vector by the scaling factor
xBinoPrior_1_ok = BinoPrior_sqrt * scaling_factor;
BinoLikelihood0 = xBinoPrior_1_ok;
% Highlight the distribution that needs to be multiplied by the priors
case 'ok' % Goal: do not affect the probability of success that much
disp('Improvement in MI: Yes');
% Considers the possibility of cognitive internal factors
% for likelihood (beyond sensory sampling)
% Square root the elements of the vector
% Highest weight on proportion for negative image, but does not reach 1
% and nonlinear decrease (slower than linear) towards proportion of positive image
BinoPrior_sqrt = sqrt(xBinoPrior);
% Calculate the sum of the original vector and the squared vector
sum_BinoPrior = sum(xBinoPrior); sum_BinoPrior_squared = sum(BinoPrior_sqrt);
% Calculate the scaling factor
scaling_factor = sum_BinoPrior / sum_BinoPrior_squared;
% Scale the squared vector by the scaling factor
xBinoPrior_1_ok = BinoPrior_sqrt * scaling_factor;
BinoLikelihood1 = xBinoPrior_1_ok(end:-1:1);
disp('Improvement in MI: No');
BinoLikelihood1 = xBinoPrior(end:-1:1);
% Highlight the distribution that needs to be multiplied by the priors
% Output likelihood distribution
figure('WindowState','maximized')
bar(xBinoPrior,param,'FaceAlpha',0.3)
xlabel('Proportion of positive image'); ylabel('Probability of image appearance')
Posterior = binoPosterior.*param;
% Scaling factor so that it sums to 1
scaling_factor = 1 / sum(Posterior);
% Scale the squared vector by the scaling factor
binoPosterior = Posterior * scaling_factor;
% Save cumulated posterior distributions
binoPosterior_cum{1,t} = binoPosterior;
binoPosterior_cum = binoPosterior_cum(~cellfun('isempty',binoPosterior_cum));
idx_currentbinoPosterior = size(binoPosterior_cum,2);
% Output posterior distribution
figure('WindowState','maximized')
bar(xBinoPrior,binoPosterior_cum{1,idx_currentbinoPosterior-1},'FaceAlpha',0.3)
bar(xBinoPrior,binoPosterior,'FaceAlpha',0.3)
legend('Prior', 'Posterior')
xlabel('Proportion of positive image');ylabel('Probability of image appearance');
% Compute new probability of success
% Number of trials (from the length of pdf_values - 1)
n = length(binoPosterior) - 1;
% Objective function to minimize
objective_function = @(p) sum((binoPosterior - binopdf(0:n, n, p)).^2);
% Use fminsearch to find the optimal p
[p0,fval,exitflag,output] = fminsearch(objective_function, p_initial);
disp(['Posterior probability of success (p): ', num2str(p0)]);
set(h, 'XData', (1:t)/fs, 'YData', p0_cum(1:t));
end
Buffer-like moving window at: 1 seconds
Buffer-like moving window at: 1.5 seconds
Buffer-like moving window at: 2 seconds
Buffer-like moving window at: 2.5 seconds
Buffer-like moving window at: 3 seconds
Buffer-like moving window at: 3.5 seconds
Buffer-like moving window at: 4 seconds
Buffer-like moving window at: 4.5 seconds


Posterior probability of success (p): 0.54775
Buffer-like moving window at: 5 seconds


Posterior probability of success (p): 0.52246
Buffer-like moving window at: 5.5 seconds


Posterior probability of success (p): 0.54297
Buffer-like moving window at: 6 seconds
Buffer-like moving window at: 6.5 seconds
Buffer-like moving window at: 7 seconds


Posterior probability of success (p): 0.5
Buffer-like moving window at: 7.5 seconds


Posterior probability of success (p): 0.46396
Buffer-like moving window at: 8 seconds


Posterior probability of success (p): 0.48271
Buffer-like moving window at: 8.5 seconds
Buffer-like moving window at: 9 seconds
Buffer-like moving window at: 9.5 seconds


Posterior probability of success (p): 0.5
Buffer-like moving window at: 10 seconds


Posterior probability of success (p): 0.53115
Buffer-like moving window at: 10.5 seconds


Posterior probability of success (p): 0.51504
Buffer-like moving window at: 11 seconds


Posterior probability of success (p): 0.48594
Buffer-like moving window at: 11.5 seconds
Buffer-like moving window at: 12 seconds
Buffer-like moving window at: 12.5 seconds


Posterior probability of success (p): 0.51328
Buffer-like moving window at: 13 seconds


Posterior probability of success (p): 0.5
Buffer-like moving window at: 13.5 seconds


Posterior probability of success (p): 0.4874
Buffer-like moving window at: 14 seconds


Posterior probability of success (p): 0.51191
Buffer-like moving window at: 14.5 seconds
Buffer-like moving window at: 15 seconds
Buffer-like moving window at: 15.5 seconds


Posterior probability of success (p): 0.48867
Buffer-like moving window at: 16 seconds


Posterior probability of success (p): 0.5
Buffer-like moving window at: 16.5 seconds


Posterior probability of success (p): 0.52119
Buffer-like moving window at: 17 seconds
Buffer-like moving window at: 17.5 seconds
Buffer-like moving window at: 18 seconds


Posterior probability of success (p): 0.51035
Buffer-like moving window at: 18.5 seconds


Posterior probability of success (p): 0.5
Buffer-like moving window at: 19 seconds



Posterior probability of success (p): 0.49014

Buffer-like moving window at: 19.5 seconds
Buffer-like moving window at: 20 seconds